PROBLEMA - SERIE GAS
library(car)
library(urca)
library(forecast)
library(tseries)
library(ggfortify)
library(TSstudio)
library(highcharter)Producción de gas mensual en Australia 1956-1995
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 1646 2675 16788 21415 38629 66600
Transformación de los datos
## [1] 0.08262296
## [1] 476
Identificación
par(mfrow=c(3,3))
plot(z1,type="o")
acf(z1, lag.max=40)
pacf(z1, lag.max=40)
plot(diff(z1),type="o")
abline(h=2*sqrt(var(diff(z1))),col="red",lty=2)
abline(h=-2*sqrt(var(diff(z1))),col="red",lty=2)
acf(diff(z1), lag.max=40)
pacf(diff(z1), lag.max=40)
plot(diff(z1,12),type="o")
acf(diff(z1,12), lag.max=40)
pacf(diff(z1,12), lag.max=40)Test DF sobre la serie desestacionalizada
Dickey-Fuller
##
## Augmented Dickey-Fuller Test
##
## data: z1
## Dickey-Fuller = -0.72371, Lag order = 7, p-value = 0.9683
## alternative hypothesis: stationary
## Warning in pp.test(z1): p-value smaller than printed p-value
##
## Phillips-Perron Unit Root Test
##
## data: z1
## Dickey-Fuller Z(alpha) = -29.784, Truncation lag parameter = 5, p-value
## = 0.01
## alternative hypothesis: stationary
- De acuerdo a la prueba de Dickey- Fuller el valor p obtenido fue 0.9586 por lo que no es posible rechazar la hipotesis nula y diremos no es estacionaria
Dickey Fuller con una diferencia
## Warning in adf.test(diff(z1)): p-value smaller than printed p-value
##
## Augmented Dickey-Fuller Test
##
## data: diff(z1)
## Dickey-Fuller = -17.802, Lag order = 7, p-value = 0.01
## alternative hypothesis: stationary
## Warning in pp.test(diff(z1)): p-value smaller than printed p-value
##
## Phillips-Perron Unit Root Test
##
## data: diff(z1)
## Dickey-Fuller Z(alpha) = -302.63, Truncation lag parameter = 5, p-value
## = 0.01
## alternative hypothesis: stationary
Los resultados de la serie anterior nos llevan a concluir que las serie con una diferencia ordinaria es estacionaria
D-F y PP sobre la serie desestacionalizada
##
## Augmented Dickey-Fuller Test
##
## data: diff(z1, 12)
## Dickey-Fuller = -3.5139, Lag order = 7, p-value = 0.04111
## alternative hypothesis: stationary
## Warning in pp.test(diff(z1, 12)): p-value smaller than printed p-value
##
## Phillips-Perron Unit Root Test
##
## data: diff(z1, 12)
## Dickey-Fuller Z(alpha) = -51.992, Truncation lag parameter = 5, p-value
## = 0.01
## alternative hypothesis: stationary
Las pruebas de Dickey Fuller y Phillips Perron nos indican que la serie z1 con una diferencia estacional es estacionaria
## Warning in adf.test(diff(diff(z1, 12))): p-value smaller than printed p-value
##
## Augmented Dickey-Fuller Test
##
## data: diff(diff(z1, 12))
## Dickey-Fuller = -7.8325, Lag order = 7, p-value = 0.01
## alternative hypothesis: stationary
par(mfrow=c(4,3))
plot(z1,type="o")
acf(z1, lag.max=40)
pacf(z1, lag.max=40)
plot(diff(z1),type="o")
abline(h=2*sqrt(var(diff(z1))),col="red",lty=2)
abline(h=-2*sqrt(var(diff(z1))),col="red",lty=2)
acf(diff(z1), lag.max=40)
pacf(diff(z1), lag.max=40)
plot(diff(z1,12),type="o")
acf(diff(z1,12), lag.max=40)
pacf(diff(z1,12), lag.max=40)
plot(diff(diff(z1,12)),type="o")
abline(h=2*sqrt(var(diff(diff(z1,12)))),col="red",lty=2)
abline(h=-2*sqrt(var(diff(diff(z1,12)))),col="red",lty=2)
acf(diff(diff(z1,12)), lag.max=40)
pacf(diff(diff(z1,12)), lag.max=40)par(mfrow=c(1,3))
plot(diff(diff(z1,12)),type="o")
abline(h=2*sqrt(var(diff(diff(z1,12)))),col="red",lty=2)
abline(h=-2*sqrt(var(diff(diff(z1,12)))),col="red",lty=2)
acf(diff(diff(z1,12)), lag.max=40)
pacf(diff(diff(z1,12)), lag.max=40)## Series: z1
## ARIMA(0,1,1)(0,1,1)[12]
##
## Coefficients:
## ma1 sma1
## -0.3752 -0.8589
## s.e. 0.0450 0.0451
##
## sigma^2 = 0.0122: log likelihood = 356.1
## AIC=-706.2 AICc=-706.14 BIC=-693.78
SARIMA(0,1,1)(0,1,1) BIC -347.7133 Lags:1,1
SARIMA(0,1,5)(0,1,1) BIC -329.9128 Lags:1:5,1
SARIMA(0,1,5)(0,1,1) BIC -341.6644 Lags:1,3,5;1
SARIMA(0,1,5)(0,1,1) BIC -345.9592 Lags:1,5;1
SARIMA(0,1,11)(0,1,1) BIC -344.8457 Lags:1,5, 10, 11;1
SARIMA(0,1,11)(0,1,1) BIC -349.3178 Lags:1,5, 11;1
SARIMA(0,1,16)(0,1,1) BIC -344.7575 Lags:1,5,11,16;1
SARIMA(16,1,11)(0,1,1) BIC -347.3572 Lags:16;1,5,11;1
SARIMA(16,1,19)(0,1,1) BIC -342.4163 Lags:16;1,5,11,18,19;1
SARIMA(19,1,18)(0,1,1) BIC -342.1835 Lags:16,19;1,5,11,18;1
SARIMA(16,1,18)(0,1,1) BIC -347.9725 Lags:16;1,5,11,18;1
Ajuste del Modelo
#modelo1<-stats::arima(z1,
#order=c(16,1,18),
#seasonal=list(order=c(0,1,1),
# period=12),
#fixed=c(rep(0,15),NA,NA,0,0,0,NA,0,0,0,0,0,NA,0,0,0,0,0,0,NA,NA))
#modelo1
#tt <- modelo1$coef[which(modelo1$coef!=0)]/sqrt(diag(modelo1$var.coef))
#1 - pt(abs(tt),(modelo1$nobs - length(modelo1$coef[which(modelo1$coef!=0)])))
#BIC(modelo1)modelo1<-stats::arima(z1,
order=c(0,1,11),
seasonal=list(order=c(0,1,1),
period=12),
fixed=c(NA,0,0,0,NA,0,0,0,0,0,NA,NA))
modelo1##
## Call:
## stats::arima(x = z1, order = c(0, 1, 11), seasonal = list(order = c(0, 1, 1),
## period = 12), fixed = c(NA, 0, 0, 0, NA, 0, 0, 0, 0, 0, NA, NA))
##
## Coefficients:
## ma1 ma2 ma3 ma4 ma5 ma6 ma7 ma8 ma9 ma10 ma11 sma1
## -0.388 0 0 0 0.0759 0 0 0 0 0 0.1479 -0.8979
## s.e. 0.043 0 0 0 0.0418 0 0 0 0 0 0.0425 0.0401
##
## sigma^2 estimated as 0.01167: log likelihood = 363.9, aic = -717.8
tt <- modelo1$coef[which(modelo1$coef!=0)]/sqrt(diag(modelo1$var.coef))
1 - pt(abs(tt),(modelo1$nobs - length(modelo1$coef[which(modelo1$coef!=0)])))## ma1 ma5 ma11 sma1
## 0.0000000000 0.0349430736 0.0002696102 0.0000000000
## [1] -697.113
Test de Autocorrelacion de Ljung-Box
\(H_0\): \(r_1=r_2=r_3=...=r_{lag}=0\)
\(H_a\): Al menos una es diferente de cero
#autoplot(modelo1)
#LBQPlot(et)
##
## Box-Ljung test
##
## data: et
## X-squared = 6.4983, df = 11, p-value = 0.8381
Test de Normalidad basado en Sesgo y Curtosis
\(H_0=\): Datos provienen de una Dist. Normal
\(H_a\): Los datos no provienen de una Dist. Normal
##
## Jarque Bera Test
##
## data: et
## X-squared = 637.98, df = 2, p-value < 2.2e-16
Test de Aleatoriedad
\(H_0:\) Residuales exhiben un comport. de Aleatoriedad \(H_a:\) Residuales no exhiben estructura (Tendencia, o cualquier otro comportamiento predecible)
##
## Runs Test
##
## data: as.factor(sign(et))
## Standard Normal = 0.47301, p-value = 0.6362
## alternative hypothesis: two.sided
inversa de boxcox
lambda <- 0.082
predt <- forecast(modelo1,h=12)
z2inv <- forecast::InvBoxCox(predt$mean,lambda)
z2inv.li <- forecast::InvBoxCox(predt$lower[,2],lambda) #Linf IC 95%
z2inv.ls <- forecast::InvBoxCox(predt$upper[,2],lambda) #lsUP ic 95%
z1.fit <- forecast::InvBoxCox(x1.fit,lambda)
plot(ts(c(gas,z2inv),start=c(1956,1),freq=12), type="l", col="blue", lwd=2,
main="Pronóstico h=12 Pasos al Frente Gas",
xlab="Anual",
ylab="",
ylim=c(min(gas,z2inv.li,z2inv.ls),max(gas,z2inv.li,z2inv.ls)))
polygon(c(time(z2inv.li),rev(time(z2inv.ls))),
c(z2inv.li,rev(z2inv.ls)),
col="gray", border=NA)
lines(z2inv, type="b", col="blue", lwd=2)
lines(z1.fit, type="l", col="red", lty=2, lwd=3) ## Jan Feb Mar Apr May Jun Jul Aug
## 1995
## 1996 41810.09 42947.86 46657.42 48479.57 57221.60 62000.03 65355.52 63329.54
## Sep Oct Nov Dec
## 1995 55824.78 52596.31 47635.67 43681.79
## 1996
## Jan Feb Mar Apr May Jun Jul Aug
## 1995
## 1996 48046.97 49959.67 54843.65 57565.93 68462.56 74784.25 79464.49 77995.18
## Sep Oct Nov Dec
## 1995 60843.99 58207.59 53452.79 49636.48
## 1996
## Warning in dt((x - m)/s, df, log = TRUE): Se han producido NaNs
## Warning in dt((x - m)/s, df, log = TRUE): Se han producido NaNs
## Warning in dt((x - m)/s, df, log = TRUE): Se han producido NaNs
## Warning in log(s): Se han producido NaNs
## Warning in log(s): Se han producido NaNs
## Warning in log(s): Se han producido NaNs
## Warning in log(s): Se han producido NaNs
## Warning in log(s): Se han producido NaNs
## m s df
## 0.0006802252 0.0717720423 3.4216798059
## (0.0039555845) (0.0043494302) (0.6008542194)
## Warning in dt((x - m)/s, df, log = TRUE): Se han producido NaNs
## Warning in dt((x - m)/s, df, log = TRUE): Se han producido NaNs
## Warning in dt((x - m)/s, df, log = TRUE): Se han producido NaNs
## Warning in log(s): Se han producido NaNs
## Warning in log(s): Se han producido NaNs
## Warning in log(s): Se han producido NaNs
## Warning in log(s): Se han producido NaNs
## Warning in log(s): Se han producido NaNs
## m s df
## 0.0006802252 0.0717720423 3.4216798059
## (0.0039555845) (0.0043494302) (0.6008542194)
## [1] 447 170